import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import tensorflow as tf
from keras.models import Sequential
from keras.layers import LSTM, GRU, Conv1D, Dense
from keras.layers import Dropout, Flatten,MaxPooling1D
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.preprocessing import MinMaxScaler
df = pd.read_csv('all_stocks_5yr.csv')
df.head()
| Date | Open | High | Low | Close | Volume | Name | |
|---|---|---|---|---|---|---|---|
| 0 | 2012-08-13 | 92.29 | 92.59 | 91.74 | 92.40 | 2075391.0 | MMM |
| 1 | 2012-08-14 | 92.36 | 92.50 | 92.01 | 92.30 | 1843476.0 | MMM |
| 2 | 2012-08-15 | 92.00 | 92.74 | 91.94 | 92.54 | 1983395.0 | MMM |
| 3 | 2012-08-16 | 92.75 | 93.87 | 92.21 | 93.74 | 3395145.0 | MMM |
| 4 | 2012-08-17 | 93.93 | 94.30 | 93.59 | 94.24 | 3069513.0 | MMM |
aal = df[df['Name']=='AAL']
aal.shape
(926, 7)
fig = go.Figure()
fig.add_trace(go.Scatter(x = aal['Date'], y = aal['High'], name = "High"))
fig.add_trace(go.Scatter(x = aal['Date'], y = aal['Low'], name = 'Low'))
fig.update_layout(title = "American Airline Stock Prices")
high_prices = aal['High'].values
trainPortion = round(high_prices.shape[0]*0.7)
valPortion = round(high_prices.shape[0]*0.15)
trainData = high_prices[:trainPortion]
valData = high_prices[trainPortion:trainPortion + valPortion]
testData = high_prices[trainPortion + valPortion:]
print('We have %d training, %d validation, and %d test points' %(len(trainData), len(valData), len(testData)))
We have 648 training, 139 validation, and 139 test points
aal[:trainPortion]['Date'].values
array(['2013-12-09', '2013-12-10', '2013-12-11', '2013-12-12',
'2013-12-13', '2013-12-16', '2013-12-17', '2013-12-18',
'2013-12-19', '2013-12-20', '2013-12-23', '2013-12-24',
'2013-12-26', '2013-12-27', '2013-12-30', '2013-12-31',
'2014-01-02', '2014-01-03', '2014-01-06', '2014-01-07',
'2014-01-08', '2014-01-09', '2014-01-10', '2014-01-13',
'2014-01-14', '2014-01-15', '2014-01-16', '2014-01-17',
'2014-01-21', '2014-01-22', '2014-01-23', '2014-01-24',
'2014-01-27', '2014-01-28', '2014-01-29', '2014-01-30',
'2014-01-31', '2014-02-03', '2014-02-04', '2014-02-05',
'2014-02-06', '2014-02-07', '2014-02-10', '2014-02-11',
'2014-02-12', '2014-02-13', '2014-02-14', '2014-02-18',
'2014-02-19', '2014-02-20', '2014-02-21', '2014-02-24',
'2014-02-25', '2014-02-26', '2014-02-27', '2014-02-28',
'2014-03-03', '2014-03-04', '2014-03-05', '2014-03-06',
'2014-03-07', '2014-03-10', '2014-03-11', '2014-03-12',
'2014-03-13', '2014-03-14', '2014-03-17', '2014-03-18',
'2014-03-19', '2014-03-20', '2014-03-21', '2014-03-24',
'2014-03-25', '2014-03-26', '2014-03-27', '2014-03-28',
'2014-03-31', '2014-04-01', '2014-04-02', '2014-04-03',
'2014-04-04', '2014-04-07', '2014-04-08', '2014-04-09',
'2014-04-10', '2014-04-11', '2014-04-14', '2014-04-15',
'2014-04-16', '2014-04-17', '2014-04-21', '2014-04-22',
'2014-04-23', '2014-04-24', '2014-04-25', '2014-04-28',
'2014-04-29', '2014-04-30', '2014-05-01', '2014-05-02',
'2014-05-05', '2014-05-06', '2014-05-07', '2014-05-08',
'2014-05-09', '2014-05-12', '2014-05-13', '2014-05-14',
'2014-05-15', '2014-05-16', '2014-05-19', '2014-05-20',
'2014-05-21', '2014-05-22', '2014-05-23', '2014-05-27',
'2014-05-28', '2014-05-29', '2014-05-30', '2014-06-02',
'2014-06-03', '2014-06-04', '2014-06-05', '2014-06-06',
'2014-06-09', '2014-06-10', '2014-06-11', '2014-06-12',
'2014-06-13', '2014-06-16', '2014-06-17', '2014-06-18',
'2014-06-19', '2014-06-20', '2014-06-23', '2014-06-24',
'2014-06-25', '2014-06-26', '2014-06-27', '2014-06-30',
'2014-07-01', '2014-07-02', '2014-07-03', '2014-07-07',
'2014-07-08', '2014-07-09', '2014-07-10', '2014-07-11',
'2014-07-14', '2014-07-15', '2014-07-16', '2014-07-17',
'2014-07-18', '2014-07-21', '2014-07-22', '2014-07-23',
'2014-07-24', '2014-07-25', '2014-07-28', '2014-07-29',
'2014-07-30', '2014-07-31', '2014-08-01', '2014-08-04',
'2014-08-05', '2014-08-06', '2014-08-07', '2014-08-08',
'2014-08-11', '2014-08-12', '2014-08-13', '2014-08-14',
'2014-08-15', '2014-08-18', '2014-08-19', '2014-08-20',
'2014-08-21', '2014-08-22', '2014-08-25', '2014-08-26',
'2014-08-27', '2014-08-28', '2014-08-29', '2014-09-02',
'2014-09-03', '2014-09-04', '2014-09-05', '2014-09-08',
'2014-09-09', '2014-09-10', '2014-09-11', '2014-09-12',
'2014-09-15', '2014-09-16', '2014-09-17', '2014-09-18',
'2014-09-19', '2014-09-22', '2014-09-23', '2014-09-24',
'2014-09-25', '2014-09-26', '2014-09-29', '2014-09-30',
'2014-10-01', '2014-10-02', '2014-10-03', '2014-10-06',
'2014-10-07', '2014-10-08', '2014-10-09', '2014-10-10',
'2014-10-13', '2014-10-14', '2014-10-15', '2014-10-16',
'2014-10-17', '2014-10-20', '2014-10-21', '2014-10-22',
'2014-10-23', '2014-10-24', '2014-10-27', '2014-10-28',
'2014-10-29', '2014-10-30', '2014-10-31', '2014-11-03',
'2014-11-04', '2014-11-05', '2014-11-06', '2014-11-07',
'2014-11-10', '2014-11-11', '2014-11-12', '2014-11-13',
'2014-11-14', '2014-11-17', '2014-11-18', '2014-11-19',
'2014-11-20', '2014-11-21', '2014-11-24', '2014-11-25',
'2014-11-26', '2014-11-28', '2014-12-01', '2014-12-02',
'2014-12-03', '2014-12-04', '2014-12-05', '2014-12-08',
'2014-12-09', '2014-12-10', '2014-12-11', '2014-12-12',
'2014-12-15', '2014-12-16', '2014-12-17', '2014-12-18',
'2014-12-19', '2014-12-22', '2014-12-23', '2014-12-24',
'2014-12-26', '2014-12-29', '2014-12-30', '2014-12-31',
'2015-01-02', '2015-01-05', '2015-01-06', '2015-01-07',
'2015-01-08', '2015-01-09', '2015-01-12', '2015-01-13',
'2015-01-14', '2015-01-15', '2015-01-16', '2015-01-20',
'2015-01-21', '2015-01-22', '2015-01-23', '2015-01-26',
'2015-01-27', '2015-01-28', '2015-01-29', '2015-01-30',
'2015-02-02', '2015-02-03', '2015-02-04', '2015-02-05',
'2015-02-06', '2015-02-09', '2015-02-10', '2015-02-11',
'2015-02-12', '2015-02-13', '2015-02-17', '2015-02-18',
'2015-02-19', '2015-02-20', '2015-02-23', '2015-02-24',
'2015-02-25', '2015-02-26', '2015-02-27', '2015-03-02',
'2015-03-03', '2015-03-04', '2015-03-05', '2015-03-06',
'2015-03-09', '2015-03-10', '2015-03-11', '2015-03-12',
'2015-03-13', '2015-03-16', '2015-03-17', '2015-03-18',
'2015-03-19', '2015-03-20', '2015-03-23', '2015-03-24',
'2015-03-25', '2015-03-26', '2015-03-27', '2015-03-30',
'2015-03-31', '2015-04-01', '2015-04-02', '2015-04-06',
'2015-04-07', '2015-04-08', '2015-04-09', '2015-04-10',
'2015-04-13', '2015-04-14', '2015-04-15', '2015-04-16',
'2015-04-17', '2015-04-20', '2015-04-21', '2015-04-22',
'2015-04-23', '2015-04-24', '2015-04-27', '2015-04-28',
'2015-04-29', '2015-04-30', '2015-05-01', '2015-05-04',
'2015-05-05', '2015-05-06', '2015-05-07', '2015-05-08',
'2015-05-11', '2015-05-12', '2015-05-13', '2015-05-14',
'2015-05-15', '2015-05-18', '2015-05-19', '2015-05-20',
'2015-05-21', '2015-05-22', '2015-05-26', '2015-05-27',
'2015-05-28', '2015-05-29', '2015-06-01', '2015-06-02',
'2015-06-03', '2015-06-04', '2015-06-05', '2015-06-08',
'2015-06-09', '2015-06-10', '2015-06-11', '2015-06-12',
'2015-06-15', '2015-06-16', '2015-06-17', '2015-06-18',
'2015-06-19', '2015-06-22', '2015-06-23', '2015-06-24',
'2015-06-25', '2015-06-26', '2015-06-29', '2015-06-30',
'2015-07-01', '2015-07-02', '2015-07-06', '2015-07-07',
'2015-07-08', '2015-07-09', '2015-07-10', '2015-07-13',
'2015-07-14', '2015-07-15', '2015-07-16', '2015-07-17',
'2015-07-20', '2015-07-21', '2015-07-22', '2015-07-23',
'2015-07-24', '2015-07-27', '2015-07-28', '2015-07-29',
'2015-07-30', '2015-07-31', '2015-08-03', '2015-08-04',
'2015-08-05', '2015-08-06', '2015-08-07', '2015-08-10',
'2015-08-11', '2015-08-12', '2015-08-13', '2015-08-14',
'2015-08-17', '2015-08-18', '2015-08-19', '2015-08-20',
'2015-08-21', '2015-08-24', '2015-08-25', '2015-08-26',
'2015-08-27', '2015-08-28', '2015-08-31', '2015-09-01',
'2015-09-02', '2015-09-03', '2015-09-04', '2015-09-08',
'2015-09-09', '2015-09-10', '2015-09-11', '2015-09-14',
'2015-09-15', '2015-09-16', '2015-09-17', '2015-09-18',
'2015-09-21', '2015-09-22', '2015-09-23', '2015-09-24',
'2015-09-25', '2015-09-28', '2015-09-29', '2015-09-30',
'2015-10-01', '2015-10-02', '2015-10-05', '2015-10-06',
'2015-10-07', '2015-10-08', '2015-10-09', '2015-10-12',
'2015-10-13', '2015-10-14', '2015-10-15', '2015-10-16',
'2015-10-19', '2015-10-20', '2015-10-21', '2015-10-22',
'2015-10-23', '2015-10-26', '2015-10-27', '2015-10-28',
'2015-10-29', '2015-10-30', '2015-11-02', '2015-11-03',
'2015-11-04', '2015-11-05', '2015-11-06', '2015-11-09',
'2015-11-10', '2015-11-11', '2015-11-12', '2015-11-13',
'2015-11-16', '2015-11-17', '2015-11-18', '2015-11-19',
'2015-11-20', '2015-11-23', '2015-11-24', '2015-11-25',
'2015-11-27', '2015-11-30', '2015-12-01', '2015-12-02',
'2015-12-03', '2015-12-04', '2015-12-07', '2015-12-08',
'2015-12-09', '2015-12-10', '2015-12-11', '2015-12-14',
'2015-12-15', '2015-12-16', '2015-12-17', '2015-12-18',
'2015-12-21', '2015-12-22', '2015-12-23', '2015-12-24',
'2015-12-28', '2015-12-29', '2015-12-30', '2015-12-31',
'2016-01-04', '2016-01-05', '2016-01-06', '2016-01-07',
'2016-01-08', '2016-01-11', '2016-01-12', '2016-01-13',
'2016-01-14', '2016-01-15', '2016-01-19', '2016-01-20',
'2016-01-21', '2016-01-22', '2016-01-25', '2016-01-26',
'2016-01-27', '2016-01-28', '2016-01-29', '2016-02-01',
'2016-02-02', '2016-02-03', '2016-02-04', '2016-02-05',
'2016-02-08', '2016-02-09', '2016-02-10', '2016-02-11',
'2016-02-12', '2016-02-16', '2016-02-17', '2016-02-18',
'2016-02-19', '2016-02-22', '2016-02-23', '2016-02-24',
'2016-02-25', '2016-02-26', '2016-02-29', '2016-03-01',
'2016-03-02', '2016-03-03', '2016-03-04', '2016-03-07',
'2016-03-08', '2016-03-09', '2016-03-10', '2016-03-11',
'2016-03-14', '2016-03-15', '2016-03-16', '2016-03-17',
'2016-03-18', '2016-03-21', '2016-03-22', '2016-03-23',
'2016-03-24', '2016-03-28', '2016-03-29', '2016-03-30',
'2016-03-31', '2016-04-01', '2016-04-04', '2016-04-05',
'2016-04-06', '2016-04-07', '2016-04-08', '2016-04-11',
'2016-04-12', '2016-04-13', '2016-04-14', '2016-04-15',
'2016-04-18', '2016-04-19', '2016-04-20', '2016-04-21',
'2016-04-22', '2016-04-25', '2016-04-26', '2016-04-27',
'2016-04-28', '2016-04-29', '2016-05-02', '2016-05-03',
'2016-05-04', '2016-05-05', '2016-05-06', '2016-05-09',
'2016-05-10', '2016-05-11', '2016-05-12', '2016-05-13',
'2016-05-16', '2016-05-17', '2016-05-18', '2016-05-19',
'2016-05-20', '2016-05-23', '2016-05-24', '2016-05-25',
'2016-05-26', '2016-05-27', '2016-05-31', '2016-06-01',
'2016-06-02', '2016-06-03', '2016-06-06', '2016-06-07',
'2016-06-08', '2016-06-09', '2016-06-10', '2016-06-13',
'2016-06-14', '2016-06-15', '2016-06-16', '2016-06-17',
'2016-06-20', '2016-06-21', '2016-06-22', '2016-06-23',
'2016-06-24', '2016-06-27', '2016-06-28', '2016-06-29',
'2016-06-30', '2016-07-01', '2016-07-05', '2016-07-06'],
dtype=object)
fig = go.Figure()
fig.add_trace(go.Scatter(x=aal[:trainPortion]['Date'],y = aal[:trainPortion]['High'], name = 'Train Set'))
fig.add_trace(go.Scatter(x=aal[trainPortion:trainPortion + valPortion]['Date'],
y = aal[trainPortion:trainPortion + valPortion]['High'],
name = 'Validation Set'))
fig.add_trace(go.Scatter(x=aal[trainPortion + valPortion:]['Date'],
y = aal[trainPortion + valPortion:]['High'],
name = 'Test Set'))
sc = MinMaxScaler(feature_range=(0, 1))
# Need to reshape data. Because sc expects a 2D array and trainData in its current form of (648,) is a scaler.
# Need to reshape it so its shape is 648x1
print(trainData.shape)
trainData = trainData.reshape(-1, 1) # -1 indicates the unspecified value that is 648 in this case
print(trainData.shape)
valData = valData.reshape(-1, 1)
testData = testData.reshape(-1, 1)
(648,) (648, 1)
sc.fit(trainData)
trainNorm = sc.transform(trainData)
valNorm = sc.transform(valData)
testNorm = sc.transform(testData)
valNorm
array([[0.13857557],
[0.16467934],
[0.20206252],
[0.31324525],
[0.326136 ],
[0.35514019],
[0.35095069],
[0.35997422],
[0.35159523],
[0.37222043],
[0.33483725],
[0.36706413],
[0.36190783],
[0.37093136],
[0.36835321],
[0.36609733],
[0.37286497],
[0.34096036],
[0.32968095],
[0.28327425],
[0.27715114],
[0.30776668],
[0.31292298],
[0.33290364],
[0.30873348],
[0.31936835],
[0.32226877],
[0.35062842],
[0.37447631],
[0.37318724],
[0.37157589],
[0.37286497],
[0.36448598],
[0.36996455],
[0.38092169],
[0.37931034],
[0.37608766],
[0.3622301 ],
[0.39284563],
[0.38059942],
[0.37189816],
[0.37157589],
[0.38027715],
[0.46245569],
[0.45891073],
[0.45665485],
[0.42571705],
[0.42346117],
[0.38768933],
[0.36932001],
[0.36061876],
[0.35256204],
[0.34450532],
[0.32162423],
[0.34998389],
[0.33838221],
[0.33451499],
[0.35288431],
[0.37673219],
[0.36835321],
[0.38092169],
[0.40541412],
[0.4386078 ],
[0.41733806],
[0.42056075],
[0.42893974],
[0.44408637],
[0.47760232],
[0.42571705],
[0.46181115],
[0.47534644],
[0.46922333],
[0.47437963],
[0.50402836],
[0.50789558],
[0.48565904],
[0.49564937],
[0.49178215],
[0.48630358],
[0.48372543],
[0.49629391],
[0.50402836],
[0.50531743],
[0.48598131],
[0.48565904],
[0.4850145 ],
[0.53109894],
[0.53754431],
[0.5665485 ],
[0.5871737 ],
[0.57299388],
[0.59426362],
[0.6480825 ],
[0.6326136 ],
[0.66645182],
[0.68707702],
[0.69545601],
[0.70190139],
[0.68385433],
[0.69900097],
[0.6996455 ],
[0.70802449],
[0.69900097],
[0.70641315],
[0.69900097],
[0.68868837],
[0.68901063],
[0.76603287],
[0.75507573],
[0.82081856],
[0.77892362],
[0.75153078],
[0.75990976],
[0.79858202],
[0.77022237],
[0.75507573],
[0.78407992],
[0.79697067],
[0.78246858],
[0.76281018],
[0.76538833],
[0.76248791],
[0.73573961],
[0.72478247],
[0.71446987],
[0.71769256],
[0.70125685],
[0.6944892 ],
[0.71446987],
[0.75217531],
[0.79697067],
[0.76796648],
[0.74959716],
[0.72768289],
[0.72929423],
[0.73864003],
[0.74089591],
[0.73864003],
[0.72929423]])
look_back = 7
foresight = 6
X, Y = [], []
X.append(valNorm[0:(0+look_back), 0])
Y.append(valNorm[0+look_back+foresight])
print(np.array(X)) # X & Y are lists
print(np.array(Y))
[[0.13857557 0.16467934 0.20206252 0.31324525 0.326136 0.35514019 0.35095069]] [[0.37093136]]
X.append(valNorm[1:(1+look_back), 0])
Y.append(valNorm[1+look_back+foresight])
print(np.array(X)) # X & Y are lists
print(np.array(Y))
[[0.13857557 0.16467934 0.20206252 0.31324525 0.326136 0.35514019 0.35095069] [0.16467934 0.20206252 0.31324525 0.326136 0.35514019 0.35095069 0.35997422]] [[0.37093136] [0.36835321]]
def createSeq(dataset, look_back, foresight):
X, Y = [], []
for i in range(len(dataset)-look_back-foresight):
obs = dataset[i:(i+look_back), 0] #Sequence of "look_back"
X.append(obs) #Append stock price value occurring "foresight+1"
Y.append(dataset[i + (look_back+foresight), 0])
return np.array(X), np.array(Y)
trainNormX, trainNormY = createSeq(trainNorm, look_back = 7, foresight = 6)
print(trainNormX.shape, trainNormY.shape)
(635, 7) (635,)
print(trainNormX[0,:],trainNormY[0])
[0.00870126 0. 0.06542056 0.04962939 0.03641637 0.051563 0.04576217] 0.029970995810505996
valNormX, valNormY = createSeq(valNorm, look_back = 7, foresight = 6)
testNormX, testNormY = createSeq(testNorm, look_back = 7, foresight = 6)
# Reshape Data for RNN
# trainNormX = np.reshape(trainNormX, (trainNormX.shape[0],trainNormX.shape[1],1))
# valNormX = np.reshape(valNormX, (valNormX.shape[0],valNormX.shape[1],1))
# testNormX = np.reshape(testNormX, (testNormX.shape[0], testNormX.shape[1],1))
# print(trainNormX.shape, valNormX.shape, testNormX.shape)
model = Sequential()
model.add(LSTM(32, input_shape=(7,1), dropout=0.1, recurrent_dropout=0.1))
model.add(Dense(1, activation='linear'))
model.compile(loss='mae', optimizer='adam', metrics =['mean_absolute_error'])
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm (LSTM) (None, 32) 4352
dense (Dense) (None, 1) 33
=================================================================
Total params: 4,385
Trainable params: 4,385
Non-trainable params: 0
_________________________________________________________________
2022-04-27 21:02:25.813473: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
# network_name = 'lstm'
# filepath = network_name + "_epoch-{epoch:02d}-loss-{loss:.4f}-.hdf5"
#checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
checkpoint = EarlyStopping(monitor='val_loss',patience=5, verbose=1, mode='auto', restore_best_weights=True)
callbacks_list = [checkpoint]
network = model.fit(trainNormX, trainNormY, validation_data=(valNormX, valNormY),
epochs=100, batch_size=64,callbacks=callbacks_list)
Epoch 1/100 10/10 [==============================] - 2s 34ms/step - loss: 0.5538 - mean_absolute_error: 0.5538 - val_loss: 0.4494 - val_mean_absolute_error: 0.4494 Epoch 2/100 10/10 [==============================] - 0s 9ms/step - loss: 0.3531 - mean_absolute_error: 0.3531 - val_loss: 0.2392 - val_mean_absolute_error: 0.2392 Epoch 3/100 10/10 [==============================] - 0s 10ms/step - loss: 0.1480 - mean_absolute_error: 0.1480 - val_loss: 0.0708 - val_mean_absolute_error: 0.0708 Epoch 4/100 10/10 [==============================] - 0s 9ms/step - loss: 0.1099 - mean_absolute_error: 0.1099 - val_loss: 0.0696 - val_mean_absolute_error: 0.0696 Epoch 5/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0984 - mean_absolute_error: 0.0984 - val_loss: 0.0898 - val_mean_absolute_error: 0.0898 Epoch 6/100 10/10 [==============================] - 0s 8ms/step - loss: 0.0950 - mean_absolute_error: 0.0950 - val_loss: 0.0680 - val_mean_absolute_error: 0.0680 Epoch 7/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0933 - mean_absolute_error: 0.0933 - val_loss: 0.0640 - val_mean_absolute_error: 0.0640 Epoch 8/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0886 - mean_absolute_error: 0.0886 - val_loss: 0.0678 - val_mean_absolute_error: 0.0678 Epoch 9/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0870 - mean_absolute_error: 0.0870 - val_loss: 0.0612 - val_mean_absolute_error: 0.0612 Epoch 10/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0860 - mean_absolute_error: 0.0860 - val_loss: 0.0680 - val_mean_absolute_error: 0.0680 Epoch 11/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0898 - mean_absolute_error: 0.0898 - val_loss: 0.0614 - val_mean_absolute_error: 0.0614 Epoch 12/100 10/10 [==============================] - 0s 8ms/step - loss: 0.0860 - mean_absolute_error: 0.0860 - val_loss: 0.0658 - val_mean_absolute_error: 0.0658 Epoch 13/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0880 - mean_absolute_error: 0.0880 - val_loss: 0.0588 - val_mean_absolute_error: 0.0588 Epoch 14/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0864 - mean_absolute_error: 0.0864 - val_loss: 0.0651 - val_mean_absolute_error: 0.0651 Epoch 15/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0840 - mean_absolute_error: 0.0840 - val_loss: 0.0566 - val_mean_absolute_error: 0.0566 Epoch 16/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0811 - mean_absolute_error: 0.0811 - val_loss: 0.0629 - val_mean_absolute_error: 0.0629 Epoch 17/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0864 - mean_absolute_error: 0.0864 - val_loss: 0.0578 - val_mean_absolute_error: 0.0578 Epoch 18/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0845 - mean_absolute_error: 0.0845 - val_loss: 0.0626 - val_mean_absolute_error: 0.0626 Epoch 19/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0883 - mean_absolute_error: 0.0883 - val_loss: 0.0612 - val_mean_absolute_error: 0.0612 Epoch 20/100 9/10 [==========================>...] - ETA: 0s - loss: 0.0859 - mean_absolute_error: 0.0859Restoring model weights from the end of the best epoch: 15. 10/10 [==============================] - 0s 9ms/step - loss: 0.0849 - mean_absolute_error: 0.0849 - val_loss: 0.0631 - val_mean_absolute_error: 0.0631 Epoch 20: early stopping
plt.plot(network.history['loss'], label='loss')
plt.plot(network.history['val_loss'], label='val loss')
plt.legend()
plt.show()
valMae = round(network.history['val_loss'][-1],2)
fig = go.Figure()
fig.add_trace(go.Scatter(y=network.history['loss'],
mode='lines',
name='Training Error'))
fig.add_trace(go.Scatter(y=network.history['val_loss'],
mode='lines',
name='Validation Error'))
fig.update_layout(yaxis_title = 'Mean Absolute Error',
xaxis_title = 'epoch',
title_text='Normalized MAE Validation = ' +
str(valMae))
fig.show()
preds = model.predict(testNormX)
plt.figure(figsize = (12,6))
plt.plot(sc.inverse_transform(preds.reshape(-1,1)), label='Predicted', color='orange')
plt.plot(sc.inverse_transform(testNormY.reshape(-1,1)),label='Actual')
plt.legend()
plt.show()
testNormPred= model.predict(testNormX)
testPred = sc.inverse_transform(testNormPred)
testY = sc.inverse_transform(testNormY.reshape(-1,1))
testMae = tf.keras.metrics.mean_absolute_error(testY, testPred)
fig = go.Figure()
fig.add_trace(go.Scatter(y=testPred.reshape(-1,),
mode='markers',
name='Model Predictions on Test Set'))
fig.add_trace(go.Scatter(y=testY.reshape(-1,),
mode='markers',
name='Target Values for the Test Set'))
fig.update_layout(title_text='Unnormalized MAE Test = '
+ str(np.mean(testMae)))
fig.show()
preds = model.predict(testNormX)
plt.figure(figsize = (12,6))
plt.plot(sc.inverse_transform(preds.reshape(-1,1)), label='Predicted', color='orange')
plt.plot(sc.inverse_transform(testNormY.reshape(-1,1)),label='Actual')
plt.legend()
plt.show()
model = Sequential()
model.add(GRU(32, input_shape=(7,1), dropout=0.1, recurrent_dropout=0.1))
model.add(Dense(1, activation='linear'))
model.compile(loss='mae', optimizer='adam', metrics =['mean_absolute_error'])
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
gru (GRU) (None, 32) 3360
dense_1 (Dense) (None, 1) 33
=================================================================
Total params: 3,393
Trainable params: 3,393
Non-trainable params: 0
_________________________________________________________________
# network_name = 'gru'
# filepath = network_name + "_epoch-{epoch:02d}-loss-{loss:.4f}-.hdf5"
#checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=0, save_best_only=True, mode='min')
checkpoint = EarlyStopping(monitor='val_loss',patience=5, verbose=1, mode='auto', restore_best_weights=True)
callbacks_list = [checkpoint]
network = model.fit(trainNormX, trainNormY, validation_data=(valNormX, valNormY),
epochs=100, batch_size=64,callbacks=callbacks_list)
Epoch 1/100 10/10 [==============================] - 2s 32ms/step - loss: 0.3067 - mean_absolute_error: 0.3067 - val_loss: 0.1753 - val_mean_absolute_error: 0.1753 Epoch 2/100 10/10 [==============================] - 0s 9ms/step - loss: 0.1105 - mean_absolute_error: 0.1105 - val_loss: 0.0697 - val_mean_absolute_error: 0.0697 Epoch 3/100 10/10 [==============================] - 0s 9ms/step - loss: 0.1099 - mean_absolute_error: 0.1099 - val_loss: 0.0603 - val_mean_absolute_error: 0.0603 Epoch 4/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0932 - mean_absolute_error: 0.0932 - val_loss: 0.0754 - val_mean_absolute_error: 0.0754 Epoch 5/100 10/10 [==============================] - 0s 7ms/step - loss: 0.0892 - mean_absolute_error: 0.0892 - val_loss: 0.0591 - val_mean_absolute_error: 0.0591 Epoch 6/100 10/10 [==============================] - 0s 8ms/step - loss: 0.0873 - mean_absolute_error: 0.0873 - val_loss: 0.0603 - val_mean_absolute_error: 0.0603 Epoch 7/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0867 - mean_absolute_error: 0.0867 - val_loss: 0.0652 - val_mean_absolute_error: 0.0652 Epoch 8/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0855 - mean_absolute_error: 0.0855 - val_loss: 0.0579 - val_mean_absolute_error: 0.0579 Epoch 9/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0868 - mean_absolute_error: 0.0868 - val_loss: 0.0595 - val_mean_absolute_error: 0.0595 Epoch 10/100 10/10 [==============================] - 0s 8ms/step - loss: 0.0865 - mean_absolute_error: 0.0865 - val_loss: 0.0607 - val_mean_absolute_error: 0.0607 Epoch 11/100 10/10 [==============================] - 0s 8ms/step - loss: 0.0857 - mean_absolute_error: 0.0857 - val_loss: 0.0559 - val_mean_absolute_error: 0.0559 Epoch 12/100 10/10 [==============================] - 0s 9ms/step - loss: 0.0829 - mean_absolute_error: 0.0829 - val_loss: 0.0614 - val_mean_absolute_error: 0.0614 Epoch 13/100 10/10 [==============================] - 0s 8ms/step - loss: 0.0861 - mean_absolute_error: 0.0861 - val_loss: 0.0602 - val_mean_absolute_error: 0.0602 Epoch 14/100 10/10 [==============================] - 0s 8ms/step - loss: 0.0813 - mean_absolute_error: 0.0813 - val_loss: 0.0561 - val_mean_absolute_error: 0.0561 Epoch 15/100 10/10 [==============================] - 0s 8ms/step - loss: 0.0826 - mean_absolute_error: 0.0826 - val_loss: 0.0649 - val_mean_absolute_error: 0.0649 Epoch 16/100 10/10 [==============================] - ETA: 0s - loss: 0.0834 - mean_absolute_error: 0.0834Restoring model weights from the end of the best epoch: 11. 10/10 [==============================] - 0s 9ms/step - loss: 0.0834 - mean_absolute_error: 0.0834 - val_loss: 0.0568 - val_mean_absolute_error: 0.0568 Epoch 16: early stopping
plt.plot(network.history['loss'], label='loss')
plt.plot(network.history['val_loss'], label='val loss')
plt.legend()
plt.show()
valMae = round(network.history['val_loss'][-1],2)
fig = go.Figure()
fig.add_trace(go.Scatter(y=network.history['loss'],
mode='lines',
name='Training Error'))
fig.add_trace(go.Scatter(y=network.history['val_loss'],
mode='lines',
name='Validation Error'))
fig.update_layout(yaxis_title = 'Mean Absolute Error',
xaxis_title = 'epoch',
title_text='Normalized MAE Validation = ' +
str(valMae))
fig.show()
testNormPred= model.predict(testNormX)
testPred = sc.inverse_transform(testNormPred)
testY = sc.inverse_transform(testNormY.reshape(-1,1))
testMae = tf.keras.metrics.mean_absolute_error(testY, testPred)
fig = go.Figure()
fig.add_trace(go.Scatter(y=testPred.reshape(-1,),
mode='markers',
name='Model Predictions on Test Set'))
fig.add_trace(go.Scatter(y=testY.reshape(-1,),
mode='markers',
name='Target Values for the Test Set'))
fig.update_layout(title_text='Unnormalized MAE Test = '
+ str(np.mean(testMae)))
fig.show()
preds = model.predict(testNormX)
plt.figure(figsize = (12,6))
plt.plot(sc.inverse_transform(preds.reshape(-1,1)), label='Predicted', color='orange')
plt.plot(sc.inverse_transform(testNormY.reshape(-1,1)),label='Actual')
plt.legend()
plt.show()
model = Sequential()
model.add(Conv1D(64, kernel_size=5, input_shape=(7,1), activation='relu'))
model.add(MaxPooling1D(pool_size = 2))
model.add(Dense(1, activation='linear'))
model.compile(loss='mae', optimizer='adam', metrics =['mean_absolute_error'])
model.summary()
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1d (Conv1D) (None, 3, 64) 384
max_pooling1d (MaxPooling1D (None, 1, 64) 0
)
dense_2 (Dense) (None, 1, 1) 65
=================================================================
Total params: 449
Trainable params: 449
Non-trainable params: 0
_________________________________________________________________
checkpoint = EarlyStopping(monitor='val_loss',patience=5, verbose=1, mode='auto', restore_best_weights=True)
callbacks_list = [checkpoint]
network = model.fit(trainNormX, trainNormY, validation_data=(valNormX, valNormY),
epochs=100, batch_size=64,callbacks=callbacks_list)
Epoch 1/100 10/10 [==============================] - 1s 13ms/step - loss: 0.4987 - mean_absolute_error: 0.4987 - val_loss: 0.3790 - val_mean_absolute_error: 0.3790 Epoch 2/100 10/10 [==============================] - 0s 4ms/step - loss: 0.2902 - mean_absolute_error: 0.2902 - val_loss: 0.1648 - val_mean_absolute_error: 0.1648 Epoch 3/100 10/10 [==============================] - 0s 4ms/step - loss: 0.1931 - mean_absolute_error: 0.1931 - val_loss: 0.0900 - val_mean_absolute_error: 0.0900 Epoch 4/100 10/10 [==============================] - 0s 4ms/step - loss: 0.1936 - mean_absolute_error: 0.1936 - val_loss: 0.0922 - val_mean_absolute_error: 0.0922 Epoch 5/100 10/10 [==============================] - 0s 4ms/step - loss: 0.1835 - mean_absolute_error: 0.1835 - val_loss: 0.0999 - val_mean_absolute_error: 0.0999 Epoch 6/100 10/10 [==============================] - 0s 5ms/step - loss: 0.1755 - mean_absolute_error: 0.1755 - val_loss: 0.1058 - val_mean_absolute_error: 0.1058 Epoch 7/100 10/10 [==============================] - 0s 5ms/step - loss: 0.1713 - mean_absolute_error: 0.1713 - val_loss: 0.1081 - val_mean_absolute_error: 0.1081 Epoch 8/100 1/10 [==>...........................] - ETA: 0s - loss: 0.1554 - mean_absolute_error: 0.1554Restoring model weights from the end of the best epoch: 3. 10/10 [==============================] - 0s 5ms/step - loss: 0.1669 - mean_absolute_error: 0.1669 - val_loss: 0.1127 - val_mean_absolute_error: 0.1127 Epoch 8: early stopping
valMae = round(network.history['val_loss'][-1],2)
fig = go.Figure()
fig.add_trace(go.Scatter(y=network.history['loss'],
mode='lines',
name='Training Error'))
fig.add_trace(go.Scatter(y=network.history['val_loss'],
mode='lines',
name='Validation Error'))
fig.update_layout(yaxis_title = 'Mean Absolute Error',
xaxis_title = 'epoch',
title_text='Normalized MAE Validation = ' +
str(valMae))
fig.show()
testNormPred= model.predict(testNormX)
testPred = sc.inverse_transform(testNormPred.reshape(-1,1))
testY = sc.inverse_transform(testNormY.reshape(-1,1))
testMae = tf.keras.metrics.mean_absolute_error(testY, testPred)
fig = go.Figure()
fig.add_trace(go.Scatter(y=testPred.reshape(-1,),
mode='markers',
name='Model Predictions on Test Set'))
fig.add_trace(go.Scatter(y=testY.reshape(-1,),
mode='markers',
name='Target Values for the Test Set'))
fig.update_layout(title_text='Unnormalized MAE Test = '
+ str(np.mean(testMae)))
fig.show()